bitkeeper revision 1.1709.1.12 (42b1c2f3rtI0UPundZWVMA0I8cRUtA)
authordjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 16 Jun 2005 18:20:35 +0000 (18:20 +0000)
committerdjm@kirby.fc.hp.com <djm@kirby.fc.hp.com>
Thu, 16 Jun 2005 18:20:35 +0000 (18:20 +0000)
adds hypercall support in HV for VTI

Signed-off-by Anthony Xu  <Anthony.xu@intel.com>
Signed-off-by Eddie Dong  <Eddie.dong@intel.com>
Signed-off-by Kevin Tian  <Kevin.tian@intel.com>

xen/arch/ia64/Makefile
xen/arch/ia64/domain.c
xen/arch/ia64/vmmu.c
xen/arch/ia64/vmx_ivt.S
xen/arch/ia64/vmx_minstate.h
xen/arch/ia64/vtlb.c
xen/include/asm-ia64/tlb.h
xen/include/asm-ia64/vmmu.h
xen/include/asm-ia64/vmx_platform.h

index 2e59a7d19d5820d7fdd67a13520ca7d287ef4fdd..03f56326fbc1eb06d1a3d5dd2c859b157d26ade7 100644 (file)
@@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o ia64_ksyms.o process.o smp.o \
 ifeq ($(CONFIG_VTI),y)
 OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \
        vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \
-       vtlb.o mmio.o vlsapic.o
+       vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o
 endif
 # perfmon.o
 # unwind.o needed for kernel unwinding (rare)
index 611e82b59440f1e1aee689fbf005f54cbc837a1f..40a38b2e07eac89bec69ab39a9138fe8de373fec 100644 (file)
@@ -194,21 +194,21 @@ void arch_do_createdomain(struct vcpu *v)
        memset(ti, 0, sizeof(struct thread_info));
        init_switch_stack(v);
 
-       /* If domain is VMX domain, shared info area is created
-        * by domain and then domain notifies HV by specific hypercall.
-        * If domain is xenolinux, shared info area is created by
-        * HV.
-        * Since we have no idea about whether domain is VMX now,
-        * (dom0 when parse and domN when build), postpone possible
-        * allocation.
-        */
+       /* Shared info area is required to be allocated at domain
+        * creation, since control panel will write some I/O info
+        * between front end and back end to that area. However for
+        * vmx domain, our design is to let domain itself to allcoate
+        * shared info area, to keep machine page contiguous. So this
+        * page will be released later when domainN issues request
+        * after up.
+        */
+       d->shared_info = (void *)alloc_xenheap_page();
 
        /* FIXME: Because full virtual cpu info is placed in this area,
         * it's unlikely to put it into one shareinfo page. Later
         * need split vcpu context from vcpu_info and conforms to
         * normal xen convention.
         */
-       d->shared_info = NULL;
        v->vcpu_info = (void *)alloc_xenheap_page();
        if (!v->vcpu_info) {
                printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
index c39d6f285174c30c298d46618b6e416514302ec7..60126b23b2b0816887af5b588c425f26ff0b0a78 100644 (file)
@@ -454,12 +454,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
     data.itir=itir;
     data.vadr=PAGEALIGN(ifa,data.ps);
-    data.section=THASH_TLB_TC;
+    data.tc = 1;
     data.cl=ISIDE_TLB;
     vmx_vcpu_get_rr(vcpu, ifa, &vrr);
     data.rid = vrr.rid;
     
-    sections.v = THASH_SECTION_TR;
+    sections.tr = 1;
+    sections.tc = 0;
 
     ovl = thash_find_overlap(hcb, &data, sections);
     while (ovl) {
@@ -467,9 +468,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
         panic("Tlb conflict!!");
         return;
     }
-    sections.v = THASH_SECTION_TC;
-    thash_purge_entries(hcb, &data, sections);
-    thash_insert(hcb, &data, ifa);
+    thash_purge_and_insert(hcb, &data);
     return IA64_NO_FAULT;
 }
 
@@ -488,11 +487,12 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
     data.itir=itir;
     data.vadr=PAGEALIGN(ifa,data.ps);
-    data.section=THASH_TLB_TC;
+    data.tc = 1;
     data.cl=DSIDE_TLB;
     vmx_vcpu_get_rr(vcpu, ifa, &vrr);
     data.rid = vrr.rid;
-    sections.v = THASH_SECTION_TR;
+    sections.tr = 1;
+    sections.tc = 0;
 
     ovl = thash_find_overlap(hcb, &data, sections);
     if (ovl) {
@@ -500,42 +500,27 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa)
         panic("Tlb conflict!!");
         return;
     }
-    sections.v = THASH_SECTION_TC;
-    thash_purge_entries(hcb, &data, sections);
-    thash_insert(hcb, &data, ifa);
+    thash_purge_and_insert(hcb, &data);
     return IA64_NO_FAULT;
 }
 
-IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va)
+/*
+ * Return TRUE/FALSE for success of lock operation
+ */
+int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock)
 {
 
-    thash_data_t data, *ovl;
     thash_cb_t  *hcb;
-    search_section_t sections;
-    rr_t    vrr;
+    rr_t  vrr;
+    u64          preferred_size;
 
-    hcb = vmx_vcpu_get_vtlb(vcpu);
-    data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
-    data.itir=0;
-    data.ps = ps;
-    data.vadr=PAGEALIGN(va,ps);
-    data.section=THASH_TLB_FM;
-    data.cl=DSIDE_TLB;
     vmx_vcpu_get_rr(vcpu, va, &vrr);
-    data.rid = vrr.rid;
-    sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM;
-
-    ovl = thash_find_overlap(hcb, &data, sections);
-    if (ovl) {
-          // generate MCA.
-        panic("Foreignmap Tlb conflict!!");
-        return;
-    }
-    thash_insert(hcb, &data, va);
-    return IA64_NO_FAULT;
+    hcb = vmx_vcpu_get_vtlb(vcpu);
+    va = PAGEALIGN(va,vrr.ps);
+    preferred_size = PSIZE(vrr.ps);
+    return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock);
 }
 
-
 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx)
 {
 
@@ -548,11 +533,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
     data.itir=itir;
     data.vadr=PAGEALIGN(ifa,data.ps);
-    data.section=THASH_TLB_TR;
+    data.tc = 0;
     data.cl=ISIDE_TLB;
     vmx_vcpu_get_rr(vcpu, ifa, &vrr);
     data.rid = vrr.rid;
-    sections.v = THASH_SECTION_TR;
+    sections.tr = 1;
+    sections.tc = 0;
 
     ovl = thash_find_overlap(hcb, &data, sections);
     if (ovl) {
@@ -560,7 +546,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
         panic("Tlb conflict!!");
         return;
     }
-    sections.v=THASH_SECTION_TC;
+    sections.tr = 0;
+    sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
     thash_tr_insert(hcb, &data, ifa, idx);
     return IA64_NO_FAULT;
@@ -579,11 +566,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
     data.page_flags=pte & ~PAGE_FLAGS_RV_MASK;
     data.itir=itir;
     data.vadr=PAGEALIGN(ifa,data.ps);
-    data.section=THASH_TLB_TR;
+    data.tc = 0;
     data.cl=DSIDE_TLB;
     vmx_vcpu_get_rr(vcpu, ifa, &vrr);
     data.rid = vrr.rid;
-    sections.v = THASH_SECTION_TR;
+    sections.tr = 1;
+    sections.tc = 0;
 
     ovl = thash_find_overlap(hcb, &data, sections);
     while (ovl) {
@@ -591,7 +579,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64
         panic("Tlb conflict!!");
         return;
     }
-    sections.v=THASH_SECTION_TC;
+    sections.tr = 0;
+    sections.tc = 1;
     thash_purge_entries(hcb, &data, sections);
     thash_tr_insert(hcb, &data, ifa, idx);
     return IA64_NO_FAULT;
@@ -607,7 +596,8 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT64 vadr,UINT64 ps)
 
     hcb = vmx_vcpu_get_vtlb(vcpu);
     rr=vmx_vcpu_rr(vcpu,vadr);
-    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+    sections.tr = 1;
+    sections.tc = 1;
     thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB);
     return IA64_NO_FAULT;
 }
@@ -619,7 +609,8 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT64 vadr,UINT64 ps)
     search_section_t sections;
     hcb = vmx_vcpu_get_vtlb(vcpu);
     rr=vmx_vcpu_rr(vcpu,vadr);
-    sections.v = THASH_SECTION_TR | THASH_SECTION_TC;
+    sections.tr = 1;
+    sections.tc = 1;
     thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB);
     return IA64_NO_FAULT;
 }
@@ -632,7 +623,8 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UINT64 vadr, UINT64 ps)
     thash_data_t data, *ovl;
     hcb = vmx_vcpu_get_vtlb(vcpu);
     vrr=vmx_vcpu_rr(vcpu,vadr);
-    sections.v = THASH_SECTION_TC;
+    sections.tr = 0;
+    sections.tc = 1;
     vadr = PAGEALIGN(vadr, ps);
 
     thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
index 9647386a8c5524d0b0a965962ec4a44030d3ce79..675315de1bd71b8143fd2369d3cdfb299443a7ca 100644 (file)
@@ -180,7 +180,7 @@ ENTRY(vmx_dtlb_miss)
     mov r29=cr.ipsr;
     ;;
     tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
-(p6)br.sptk vmx_fault_1
+(p6)br.sptk vmx_fault_2
     mov r16 = cr.ifa
     ;;
     thash r17 = r16
@@ -346,7 +346,12 @@ END(vmx_daccess_bit)
 ENTRY(vmx_break_fault)
        mov r31=pr
     mov r19=11
-    br.sptk.many vmx_dispatch_break_fault
+    mov r30=cr.iim
+    mov r29=0x1100
+    ;;
+    cmp4.eq  p6,p7=r29,r30
+    (p6) br.dptk.few vmx_hypercall_dispatch
+    (p7) br.sptk.many vmx_dispatch_break_fault
 END(vmx_break_fault)
 
        .org vmx_ia64_ivt+0x3000
@@ -929,10 +934,9 @@ END(vmx_dispatch_tlb_miss)
 
 
 ENTRY(vmx_dispatch_break_fault)
-    cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
-    ;;
     VMX_SAVE_MIN_WITH_COVER_R19
     ;;
+    ;;
     alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
     mov out0=cr.ifa
     adds out1=16,sp
@@ -951,9 +955,37 @@ ENTRY(vmx_dispatch_break_fault)
     ;;
     mov rp=r14
     br.call.sptk.many b6=vmx_ia64_handle_break
+    ;;
 END(vmx_dispatch_break_fault)
 
 
+ENTRY(vmx_hypercall_dispatch)
+    VMX_SAVE_MIN_WITH_COVER
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    ssm psr.i               // restore psr.i
+    adds r3=16,r2                // set up second base pointer
+    ;;
+    VMX_SAVE_REST
+    ;;
+    movl r14=ia64_leave_hypervisor
+    movl r2=hyper_call_table
+    ;;
+    mov rp=r14
+    shladd r2=r15,3,r2
+    ;;
+    ld8 r2=[r2]
+    ;;
+    mov b6=r2
+    ;;
+    br.call.sptk.many b6=b6
+    ;;
+END(vmx_hypercall_dispatch)
+
+
+
 ENTRY(vmx_dispatch_interrupt)
     cmp.ne pEml,pNonEml=r0,r0       /* force pNonEml =1, don't save r4 ~ r7 */
     ;;
@@ -976,3 +1008,39 @@ ENTRY(vmx_dispatch_interrupt)
        mov rp=r14
        br.call.sptk.many b6=vmx_ia64_handle_irq
 END(vmx_dispatch_interrupt)
+
+
+
+    .rodata
+    .align 8
+    .globl hyper_call_table
+hyper_call_table:
+    data8 hyper_not_support     //hyper_set_trap_table     /*  0 */
+    data8 hyper_mmu_update
+    data8 hyper_not_support     //hyper_set_gdt
+    data8 hyper_not_support     //hyper_stack_switch
+    data8 hyper_not_support     //hyper_set_callbacks
+    data8 hyper_not_support     //hyper_fpu_taskswitch     /*  5 */
+    data8 hyper_sched_op
+    data8 hyper_dom0_op
+    data8 hyper_not_support     //hyper_set_debugreg
+    data8 hyper_not_support     //hyper_get_debugreg
+    data8 hyper_not_support     //hyper_update_descriptor  /* 10 */
+    data8 hyper_not_support     //hyper_set_fast_trap
+    data8 hyper_dom_mem_op
+    data8 hyper_not_support     //hyper_multicall
+    data8 hyper_not_support     //hyper_update_va_mapping
+    data8 hyper_not_support     //hyper_set_timer_op       /* 15 */
+    data8 hyper_event_channel_op
+    data8 hyper_xen_version
+    data8 hyper_not_support     //hyper_console_io
+    data8 hyper_not_support     //hyper_physdev_op
+    data8 hyper_not_support     //hyper_grant_table_op     /* 20 */
+    data8 hyper_not_support     //hyper_vm_assist
+    data8 hyper_not_support     //hyper_update_va_mapping_otherdomain
+    data8 hyper_not_support     //hyper_switch_vm86
+    data8 hyper_not_support     //hyper_boot_vcpu
+    data8 hyper_not_support     //hyper_ni_hypercall       /* 25 */
+    data8 hyper_not_support     //hyper_mmuext_op
+    data8 hyper_lock_page
+    data8 hyper_set_shared_page
index afee6516d91802f5b73dc75eede7e3677d98e88a..76f8e7f065e098ffaff35f7b9a0e77145d6be70e 100644 (file)
     ;;                  \
 .mem.offset 0,0; st8.spill [r4]=r20,16;     \
 .mem.offset 8,0; st8.spill [r5]=r21,16;     \
-    mov r18=b6;         \
     ;;                  \
 .mem.offset 0,0; st8.spill [r4]=r22,16;     \
 .mem.offset 8,0; st8.spill [r5]=r23,16;     \
-    mov r19=b7;     \
     ;;                  \
 .mem.offset 0,0; st8.spill [r4]=r24,16;     \
 .mem.offset 8,0; st8.spill [r5]=r25,16;     \
     ;;                  \
 .mem.offset 0,0; st8.spill [r4]=r28,16;     \
 .mem.offset 8,0; st8.spill [r5]=r29,16;     \
+    mov r26=b6;         \
     ;;                  \
 .mem.offset 0,0; st8.spill [r4]=r30,16;     \
 .mem.offset 8,0; st8.spill [r5]=r31,16;     \
+    mov r27=b7;     \
     ;;                  \
     mov r30=ar.unat;    \
     ;;      \
     adds r2=PT(B6)-PT(F10),r2;      \
     adds r3=PT(B7)-PT(F11),r3;      \
     ;;          \
-    st8 [r2]=r18,16;       /* b6 */    \
-    st8 [r3]=r19,16;       /* b7 */    \
+    st8 [r2]=r26,16;       /* b6 */    \
+    st8 [r3]=r27,16;       /* b7 */    \
     ;;                  \
     st8 [r2]=r9;           /* ar.csd */    \
     st8 [r3]=r10;          /* ar.ssd */    \
index 6cbb4478b7e95165dd90757594e7fbe4cc6a7503..86565531bffd2e93300639ddea2680b6eb807854 100644 (file)
@@ -252,7 +252,7 @@ static thash_data_t *_vtlb_next_overlap_in_chain(thash_cb_t *hcb)
 
     /* Find overlap TLB entry */
     for (cch=priv->cur_cch; cch; cch = cch->next) {
-        if ( ((1UL<<cch->section) & priv->s_sect.v) &&
+        if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr )  &&
             __is_tlb_overlap(hcb, cch, priv->rid, priv->cl,
                 priv->_curva, priv->_eva) ) {
             return cch;
@@ -322,7 +322,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb,
 
 void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx)
 {
-    if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) {
+    if ( hcb->ht != THASH_TLB || entry->tc ) {
         panic("wrong parameter\n");
     }
     entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -356,7 +356,7 @@ thash_data_t *__alloc_chain(thash_cb_t *hcb,thash_data_t *entry)
  *  3: The caller need to make sure the new entry will not overlap 
  *     with any existed entry.
  */
-static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
+void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
 {
     thash_data_t    *hash_table, *cch;
     rr_t  vrr;
@@ -411,7 +411,7 @@ void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
     rr_t  vrr;
     
     vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
-    if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) {
+    if ( entry->ps != vrr.ps && entry->tc ) {
         panic("Not support for multiple page size now\n");
     }
     entry->vadr = PAGEALIGN(entry->vadr,entry->ps);
@@ -450,7 +450,7 @@ static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
     thash_internal_t *priv = &hcb->priv;
     int idx;
     
-    if ( entry->section == THASH_TLB_TR ) {
+    if ( !entry->tc ) {
         return rem_tr(hcb, entry->cl, entry->tr_idx);
     }
     rem_thash(hcb, entry);
@@ -525,19 +525,19 @@ thash_data_t *thash_find_overlap(thash_cb_t *hcb,
             thash_data_t *in, search_section_t s_sect)
 {
     return (hcb->find_overlap)(hcb, in->vadr, 
-            in->ps, in->rid, in->cl, s_sect);
+            PSIZE(in->ps), in->rid, in->cl, s_sect);
 }
 
 static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 
-        u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
     u64     tag;
     rr_t    vrr;
 
-    priv->_curva = PAGEALIGN(va,ps);
-    priv->_eva = priv->_curva + PSIZE(ps);
+    priv->_curva = va & ~(size-1);
+    priv->_eva = priv->_curva + size;
     priv->rid = rid;
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
     priv->ps = vrr.ps;
@@ -553,15 +553,15 @@ static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb,
 }
 
 static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 
-        u64 va, u64 ps, int rid, char cl, search_section_t s_sect)
+        u64 va, u64 size, int rid, char cl, search_section_t s_sect)
 {
     thash_data_t    *hash_table;
     thash_internal_t *priv = &hcb->priv;
     u64     tag;
     rr_t    vrr;
 
-    priv->_curva = PAGEALIGN(va,ps);
-    priv->_eva = priv->_curva + PSIZE(ps);
+    priv->_curva = va & ~(size-1);
+    priv->_eva = priv->_curva + size;
     priv->rid = rid;
     vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
     priv->ps = vrr.ps;
@@ -691,13 +691,46 @@ void thash_purge_entries_ex(thash_cb_t *hcb,
 {
     thash_data_t    *ovl;
 
-    ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect);
+    ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect);
     while ( ovl != NULL ) {
         (hcb->rem_hash)(hcb, ovl);
         ovl = (hcb->next_overlap)(hcb);
     };
 }
 
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ *    Notes: Only TC entry can purge and insert.
+ */
+void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in)
+{
+    thash_data_t    *ovl;
+    search_section_t sections;
+
+#ifdef   XEN_DEBUGGER
+    vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr);
+       if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) {
+               panic ("Oops, wrong call for purge_and_insert\n");
+               return;
+       }
+#endif
+    in->vadr = PAGEALIGN(in->vadr,in->ps);
+    in->ppn = PAGEALIGN(in->ppn, in->ps-12);
+    sections.tr = 0;
+    sections.tc = 1;
+    ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps),
+                                in->rid, in->cl, sections);
+    if(ovl)
+        (hcb->rem_hash)(hcb, ovl);
+#ifdef   XEN_DEBUGGER
+    ovl = (hcb->next_overlap)(hcb);
+    if ( ovl ) {
+               panic ("Oops, 2+ overlaps for purge_and_insert\n");
+               return;
+    }
+#endif
+    (hcb->ins_hash)(hcb, in, in->vadr);
+}
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
@@ -766,6 +799,42 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb,
     return NULL;
 }
 
+/*
+ * Lock/Unlock TC if found.
+ *     NOTES: Only the page in prefered size can be handled.
+ *   return:
+ *          1: failure
+ *          0: success
+ */
+int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock)
+{
+       thash_data_t    *ovl;
+       search_section_t        sections;
+
+    sections.tr = 1;
+    sections.tc = 1;
+       ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections);
+       if ( ovl ) {
+               if ( !ovl->tc ) {
+//                     panic("Oops, TR for lock\n");
+                       return 0;
+               }
+               else if ( lock ) {
+                       if ( ovl->locked ) {
+                               DPRINTK("Oops, already locked entry\n");
+                       }
+                       ovl->locked = 1;
+               }
+               else if ( !lock ) {
+                       if ( !ovl->locked ) {
+                               DPRINTK("Oops, already unlocked entry\n");
+                       }
+                       ovl->locked = 0;
+               }
+               return 0;
+       }
+       return 1;
+}
 
 /*
  * Notifier when TLB is deleted from hash table and its collision chain.
@@ -824,7 +893,6 @@ void thash_init(thash_cb_t *hcb, u64 sz)
     }
 }
 
-
 #ifdef  VTLB_DEBUG
 static  u64 cch_length_statistics[MAX_CCH_LENGTH+1];
 u64  sanity_check=0;
index 7947bf3dccace684629ac3e7ac1a49b47b67e83f..049f7b5f21eae6df183c6a8f38ca165e46ad4cac 100644 (file)
@@ -39,11 +39,11 @@ typedef struct {
 typedef union {
         unsigned long   value;
         struct {
-                uint64_t ve : 1;
-                uint64_t rv1 : 1;
-                uint64_t ps  : 6;
-                uint64_t rid : 24;
-                uint64_t rv2 : 32;
+                unsigned long ve : 1;
+                unsigned long rv1 : 1;
+                unsigned long ps  : 6;
+                unsigned long rid : 24;
+                unsigned long rv2 : 32;
         };
 } rr_t;
 #endif // CONFIG_VTI
index cee7d89a904c5892952d2cbfe6aaaab32feabf16..8464c929ac859ec886b5ab738b1b7c2f1d57caad 100644 (file)
 #include "public/xen.h"
 #include "asm/tlb.h"
 
-#define         THASH_TLB_TR            0
-#define         THASH_TLB_TC            1
-#define         THASH_TLB_FM            2       // foreign map
+//#define         THASH_TLB_TR            0
+//#define         THASH_TLB_TC            1
 
-#define         THASH_SECTION_TR        (1<<0)
-#define         THASH_SECTION_TC        (1<<1)
-#define         THASH_SECTION_FM        (1<<2)
+
+// bit definition of TR, TC search cmobination
+//#define         THASH_SECTION_TR        (1<<0)
+//#define         THASH_SECTION_TC        (1<<1)
 
 /*
  * Next bit definition must be same with THASH_TLB_XX
@@ -43,8 +43,7 @@ typedef union search_section {
         struct {
                 u32 tr : 1;
                 u32 tc : 1;
-                u32 fm : 1;
-                u32 rsv: 29;
+                u32 rsv: 30;
         };
         u32     v;
 } search_section_t;
@@ -80,12 +79,10 @@ typedef struct thash_data {
             u64 ig1  :  11; //53-63
         };
         struct {
-            u64 __rv1 : 12;
-            // sizeof(domid_t) must be less than 38!!! Refer to its definition
-            u64 fm_dom : 38; // 12-49 foreign map domain ID
-            u64 __rv2 : 3;   // 50-52
+            u64 __rv1 : 53;    // 0-52
             // next extension to ig1, only for TLB instance
-            u64 section : 2;     // 53-54 TR, TC or FM (thash_TLB_XX)
+            u64 tc : 1;     // 53 TR or TC
+            u64 locked  : 1;   // 54 entry locked or not
             CACHE_LINE_TYPE cl : 1; // I side or D side cache line
             u64 nomap : 1;   // entry cann't be inserted into machine TLB.
             u64 __ig1  :  5; // 56-61
@@ -227,8 +224,8 @@ typedef struct thash_cb {
            INVALID_ENTRY(hcb, hash) = 1;        \
            hash->next = NULL; }
 
-#define PURGABLE_ENTRY(hcb,en)          \
-                ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC)
+#define PURGABLE_ENTRY(hcb,en)  \
+               ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) )
 
 
 /*
@@ -306,7 +303,7 @@ extern void thash_purge_entries_ex(thash_cb_t *hcb,
                         u64 rid, u64 va, u64 sz, 
                         search_section_t p_sect, 
                         CACHE_LINE_TYPE cl);
-extern thash_cb_t *init_domain_tlb(struct vcpu *d);
+extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in);
 
 /*
  * Purge all TCs or VHPT entries including those in Hash table.
@@ -323,6 +320,7 @@ extern thash_data_t *vtlb_lookup(thash_cb_t *hcb,
                         thash_data_t *in);
 extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 
                         u64 rid, u64 va,CACHE_LINE_TYPE cl);
+extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock);
 
 
 #define   ITIR_RV_MASK      (((1UL<<32)-1)<<32 | 0x3)
@@ -332,6 +330,7 @@ extern u64 machine_thash(PTA pta, u64 va, u64 rid, u64 ps);
 extern void purge_machine_tc_by_domid(domid_t domid);
 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb);
 extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va);
+extern thash_cb_t *init_domain_tlb(struct vcpu *d);
 
 #define   VTLB_DEBUG
 #ifdef   VTLB_DEBUG
index bf59e61feca046762538bf480910aa7e248865c0..37560863fa66359232fb5b94895b1dc2ecfbed76 100644 (file)
@@ -25,7 +25,7 @@
 struct mmio_list;
 typedef struct virutal_platform_def {
     //unsigned long          *real_mode_data; /* E820, etc. */
-    //unsigned long          shared_page_va;
+    unsigned long          shared_page_va;
     //struct vmx_virpit_t    vmx_pit;
     //struct vmx_handler_t   vmx_handler;
     //struct mi_per_cpu_info mpci;            /* MMIO */